var runtime.physPageSize
52 uses
runtime (current package)
arena.go#L209: if userArenaChunkBytes%physPageSize != 0 {
arena.go#L827: if uintptr(x)%physPageSize != 0 {
malloc.go#L354: var physPageSize uintptr
malloc.go#L385: if physPageSize == 0 {
malloc.go#L389: if physPageSize > maxPhysPageSize {
malloc.go#L390: print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n")
malloc.go#L393: if physPageSize < minPhysPageSize {
malloc.go#L394: print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
malloc.go#L397: if physPageSize&(physPageSize-1) != 0 {
malloc.go#L398: print("system page size (", physPageSize, ") must be a power of 2\n")
malloc.go#L797: size = physPageSize
malloc.go#L2032: if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
mem_linux.go#L42: if uintptr(v)&(physPageSize-1) != 0 || n&(physPageSize-1) != 0 {
mem_linux.go#L111: if uintptr(v)&(physPageSize-1) != 0 {
mem_linux.go#L120: if uintptr(v)&(physPageSize-1) != 0 {
mgcscavenge.go#L211: gcPercentGoal = (gcPercentGoal + uint64(physPageSize) - 1) &^ (uint64(physPageSize) - 1)
mgcscavenge.go#L229: if heapRetainedNow <= gcPercentGoal || heapRetainedNow-gcPercentGoal < uint64(physPageSize) {
mgcscavenge.go#L613: worked += approxWorkedNSPerPhysicalPage * float64(r/physPageSize)
mgcscavenge.go#L634: if released > 0 && released < physPageSize {
mgcscavenge.go#L749: minPages := physPageSize / pageSize
mgcscavenge.go#L962: if physHugePageSize > pageSize && physHugePageSize > physPageSize {
mheap.go#L1186: needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize
mheap.go#L1220: extraPages := physPageSize / pageSize
mheap.go#L1243: base = alignUp(base, physPageSize)
mheap.go#L1494: nBase := alignUp(end, physPageSize)
mheap.go#L1537: nBase = alignUp(h.curArena.base+ask, physPageSize)
mpagealloc_64bit.go#L78: b := alignUp(uintptr(entries)*pallocSumBytes, physPageSize)
mpagealloc_64bit.go#L118: baseOffset := alignDown(uintptr(sumIdxBase)*pallocSumBytes, physPageSize)
mpagealloc_64bit.go#L119: limitOffset := alignUp(uintptr(sumIdxLimit)*pallocSumBytes, physPageSize)
mpagealloc_64bit.go#L210: needMin := alignDown(uintptr(chunkIndex(base)), physPageSize/scSize)
mpagealloc_64bit.go#L211: needMax := alignUp(uintptr(chunkIndex(limit)), physPageSize/scSize)
os_linux.go#L274: physPageSize = n
os_linux.go#L278: if physPageSize == 0 {
os_linux.go#L279: physPageSize = size
os_linux.go#L317: physPageSize = val
runtime.go#L125: func syscall_Getpagesize() int { return int(physPageSize) }
signal_unix.go#L875: if n > physPageSize-pc%physPageSize {
signal_unix.go#L876: n = physPageSize - pc%physPageSize
stack.go#L352: n = uint32(alignUp(uintptr(n), physPageSize))
vgetrandom_linux.go#L51: allocSize := (num*stateSizeCacheAligned + physPageSize - 1) &^ (physPageSize - 1)
vgetrandom_linux.go#L52: num = (physPageSize / stateSizeCacheAligned) * (allocSize / physPageSize)
vgetrandom_linux.go#L63: if (newBlock&(physPageSize-1))+vgetrandomAlloc.stateSize > physPageSize {
vgetrandom_linux.go#L64: newBlock = (newBlock + physPageSize - 1) &^ (physPageSize - 1)